LIST_HEAD(acpi_drhd_units);
LIST_HEAD(acpi_rmrr_units);
LIST_HEAD(acpi_atsr_units);
-LIST_HEAD(acpi_ioapic_units);
u8 dmar_host_address_width;
return 0;
}
+static int acpi_ioapic_device_match(
+ struct list_head *ioapic_list, unsigned int apic_id)
+{
+ struct acpi_ioapic_unit *ioapic;
+ list_for_each_entry( ioapic, ioapic_list, list ) {
+ if (ioapic->apic_id == apic_id)
+ return 1;
+ }
+ return 0;
+}
+
+struct acpi_drhd_unit * ioapic_to_drhd(unsigned int apic_id)
+{
+ struct acpi_drhd_unit *drhd;
+ list_for_each_entry( drhd, &acpi_drhd_units, list ) {
+ if ( acpi_ioapic_device_match(&drhd->ioapic_list, apic_id) ) {
+ dprintk(XENLOG_INFO VTDPREFIX,
+ "ioapic_to_drhd: drhd->address = %lx\n",
+ drhd->address);
+ return drhd;
+ }
+ }
+ return NULL;
+}
+
+struct iommu * ioapic_to_iommu(unsigned int apic_id)
+{
+ struct acpi_drhd_unit *drhd;
+
+ list_for_each_entry( drhd, &acpi_drhd_units, list ) {
+ if ( acpi_ioapic_device_match(&drhd->ioapic_list, apic_id) ) {
+ dprintk(XENLOG_INFO VTDPREFIX,
+ "ioapic_to_iommu: drhd->address = %lx\n",
+ drhd->address);
+ return drhd->iommu;
+ }
+ }
+ dprintk(XENLOG_WARNING VTDPREFIX, "returning NULL\n");
+ return NULL;
+}
+
static int acpi_pci_device_match(struct pci_dev *devices, int cnt,
struct pci_dev *dev)
{
if ( acpi_pci_device_match(drhd->devices,
drhd->devices_cnt, dev) )
{
- gdprintk(XENLOG_INFO VTDPREFIX,
- "acpi_find_matched_drhd_unit: drhd->address = %lx\n",
- drhd->address);
+ dprintk(XENLOG_INFO VTDPREFIX,
+ "acpi_find_matched_drhd_unit: drhd->address = %lx\n",
+ drhd->address);
return drhd;
}
}
if ( include_all_drhd )
{
- gdprintk(XENLOG_INFO VTDPREFIX,
- "acpi_find_matched_drhd_unit:include_all_drhd->addr = %lx\n",
- include_all_drhd->address);
+ dprintk(XENLOG_INFO VTDPREFIX,
+ "acpi_find_matched_drhd_unit:include_all_drhd->addr = %lx\n",
+ include_all_drhd->address);
return include_all_drhd;
}
if ( all_ports_atsru )
{
- gdprintk(XENLOG_INFO VTDPREFIX,
- "acpi_find_matched_atsr_unit: all_ports_atsru\n");
+ dprintk(XENLOG_INFO VTDPREFIX,
+ "acpi_find_matched_atsr_unit: all_ports_atsru\n");
return all_ports_atsru;;
}
while ( start < end )
{
scope = start;
- if ( scope->length < MIN_SCOPE_LEN )
+ if ( (scope->length < MIN_SCOPE_LEN) ||
+ (scope->dev_type >= ACPI_DEV_ENTRY_COUNT) )
{
- printk(KERN_WARNING PREFIX "Invalid device scope\n");
+ dprintk(XENLOG_WARNING VTDPREFIX, "Invalid device scope\n");
return -EINVAL;
}
if ( scope->dev_type == ACPI_DEV_ENDPOINT )
{
- printk(KERN_INFO PREFIX
- "found endpoint: bdf = %x:%x:%x\n",
- bus, path->dev, path->fn);
+ dprintk(XENLOG_INFO VTDPREFIX,
+ "found endpoint: bdf = %x:%x:%x\n",
+ bus, path->dev, path->fn);
count++;
}
else if ( scope->dev_type == ACPI_DEV_P2PBRIDGE )
{
- printk(KERN_INFO PREFIX
- "found bridge: bdf = %x:%x:%x\n",
- bus, path->dev, path->fn);
+ dprintk(XENLOG_INFO VTDPREFIX,
+ "found bridge: bdf = %x:%x:%x\n",
+ bus, path->dev, path->fn);
sec_bus = read_pci_config_byte(
bus, path->dev, path->fn, PCI_SECONDARY_BUS);
sub_bus = read_pci_config_byte(
}
else if ( scope->dev_type == ACPI_DEV_IOAPIC )
{
- printk(KERN_INFO PREFIX
- "found IOAPIC: bdf = %x:%x:%x\n",
- bus, path->dev, path->fn);
+ dprintk(XENLOG_INFO VTDPREFIX,
+ "found IOAPIC: bdf = %x:%x:%x\n",
+ bus, path->dev, path->fn);
count++;
}
else
{
- printk(KERN_INFO PREFIX
- "found MSI HPET: bdf = %x:%x:%x\n",
- bus, path->dev, path->fn);
+ dprintk(XENLOG_INFO VTDPREFIX,
+ "found MSI HPET: bdf = %x:%x:%x\n",
+ bus, path->dev, path->fn);
count++;
}
return count;
}
-static int __init acpi_parse_dev_scope(void *start, void *end, int *cnt,
- struct pci_dev **devices)
+static int __init acpi_parse_dev_scope(
+ void *start, void *end, void *acpi_entry, int type)
{
struct acpi_dev_scope *scope;
u8 bus, sub_bus, sec_bus;
u8 dev, func;
u32 l;
+ int *cnt = NULL;
+ struct pci_dev **devices = NULL;
+ struct acpi_drhd_unit *dmaru = (struct acpi_drhd_unit *) acpi_entry;
+ struct acpi_rmrr_unit *rmrru = (struct acpi_rmrr_unit *) acpi_entry;
+ struct acpi_atsr_unit *atsru = (struct acpi_atsr_unit *) acpi_entry;
+
+ switch (type) {
+ case DMAR_TYPE:
+ cnt = &(dmaru->devices_cnt);
+ devices = &(dmaru->devices);
+ break;
+ case RMRR_TYPE:
+ cnt = &(rmrru->devices_cnt);
+ devices = &(rmrru->devices);
+ break;
+ case ATSR_TYPE:
+ cnt = &(atsru->devices_cnt);
+ devices = &(atsru->devices);
+ break;
+ default:
+ dprintk(XENLOG_ERR VTDPREFIX, "invalid vt-d acpi entry type\n");
+ }
+
*cnt = scope_device_count(start, end);
if ( *cnt == 0 )
{
- printk(KERN_INFO PREFIX "acpi_parse_dev_scope: no device\n");
+ dprintk(XENLOG_INFO VTDPREFIX, "acpi_parse_dev_scope: no device\n");
return 0;
}
if ( scope->dev_type == ACPI_DEV_ENDPOINT )
{
- printk(KERN_INFO PREFIX
- "found endpoint: bdf = %x:%x:%x\n",
- bus, path->dev, path->fn);
+ dprintk(XENLOG_INFO VTDPREFIX,
+ "found endpoint: bdf = %x:%x:%x\n",
+ bus, path->dev, path->fn);
pdev->bus = bus;
pdev->devfn = PCI_DEVFN(path->dev, path->fn);
pdev++;
}
else if ( scope->dev_type == ACPI_DEV_P2PBRIDGE )
{
- printk(KERN_INFO PREFIX
- "found bridge: bus = %x dev = %x func = %x\n",
- bus, path->dev, path->fn);
+ dprintk(XENLOG_INFO VTDPREFIX,
+ "found bridge: bus = %x dev = %x func = %x\n",
+ bus, path->dev, path->fn);
sec_bus = read_pci_config_byte(
bus, path->dev, path->fn, PCI_SECONDARY_BUS);
sub_bus = read_pci_config_byte(
acpi_ioapic_unit->ioapic.bdf.bus = bus;
acpi_ioapic_unit->ioapic.bdf.dev = path->dev;
acpi_ioapic_unit->ioapic.bdf.func = path->fn;
- list_add(&acpi_ioapic_unit->list, &acpi_ioapic_units);
- printk(KERN_INFO PREFIX
- "found IOAPIC: bus = %x dev = %x func = %x\n",
- bus, path->dev, path->fn);
+ list_add(&acpi_ioapic_unit->list, &dmaru->ioapic_list);
+ dprintk(XENLOG_INFO VTDPREFIX,
+ "found IOAPIC: bus = %x dev = %x func = %x\n",
+ bus, path->dev, path->fn);
}
else
- printk(KERN_INFO PREFIX
- "found MSI HPET: bus = %x dev = %x func = %x\n",
- bus, path->dev, path->fn);
-
+ dprintk(XENLOG_INFO VTDPREFIX,
+ "found MSI HPET: bus = %x dev = %x func = %x\n",
+ bus, path->dev, path->fn);
start += scope->length;
}
struct acpi_drhd_unit *dmaru;
int ret = 0;
static int include_all;
+ void *dev_scope_start, *dev_scope_end;
dmaru = xmalloc(struct acpi_drhd_unit);
if ( !dmaru )
dmaru->address = drhd->address;
dmaru->include_all = drhd->flags & 1; /* BIT0: INCLUDE_ALL */
- printk(KERN_INFO PREFIX "dmaru->address = %lx\n", dmaru->address);
+ INIT_LIST_HEAD(&dmaru->ioapic_list);
+ dprintk(XENLOG_INFO VTDPREFIX, "dmaru->address = %lx\n", dmaru->address);
- if ( !dmaru->include_all )
- ret = acpi_parse_dev_scope(
- (void *)(drhd + 1),
- ((void *)drhd) + header->length,
- &dmaru->devices_cnt, &dmaru->devices);
- else
+ dev_scope_start = (void *)(drhd + 1);
+ dev_scope_end = ((void *)drhd) + header->length;
+ ret = acpi_parse_dev_scope(dev_scope_start, dev_scope_end,
+ dmaru, DMAR_TYPE);
+
+ if ( dmaru->include_all )
{
- printk(KERN_INFO PREFIX "found INCLUDE_ALL\n");
+ dprintk(XENLOG_INFO VTDPREFIX, "found INCLUDE_ALL\n");
/* Only allow one INCLUDE_ALL */
if ( include_all )
{
- printk(KERN_WARNING PREFIX "Only one INCLUDE_ALL "
- "device scope is allowed\n");
+ dprintk(XENLOG_WARNING VTDPREFIX,
+ "Only one INCLUDE_ALL device scope is allowed\n");
ret = -EINVAL;
}
include_all = 1;
{
struct acpi_table_rmrr *rmrr = (struct acpi_table_rmrr *)header;
struct acpi_rmrr_unit *rmrru;
+ void *dev_scope_start, *dev_scope_end;
int ret = 0;
rmrru = xmalloc(struct acpi_rmrr_unit);
rmrru->base_address = rmrr->base_address;
rmrru->end_address = rmrr->end_address;
- printk(KERN_INFO PREFIX
- "acpi_parse_one_rmrr: base=%"PRIx64" end=%"PRIx64"\n",
- rmrr->base_address, rmrr->end_address);
-
- ret = acpi_parse_dev_scope(
- (void *)(rmrr + 1),
- ((void*)rmrr) + header->length,
- &rmrru->devices_cnt, &rmrru->devices);
-
+ dev_scope_start = (void *)(rmrr + 1);
+ dev_scope_end = ((void *)rmrr) + header->length;
+ ret = acpi_parse_dev_scope(dev_scope_start, dev_scope_end,
+ rmrru, RMRR_TYPE);
if ( ret || (rmrru->devices_cnt == 0) )
xfree(rmrru);
else
struct acpi_atsr_unit *atsru;
int ret = 0;
static int all_ports;
+ void *dev_scope_start, *dev_scope_end;
atsru = xmalloc(struct acpi_atsr_unit);
if ( !atsru )
atsru->all_ports = atsr->flags & 1; /* BIT0: ALL_PORTS */
if ( !atsru->all_ports )
- ret = acpi_parse_dev_scope(
- (void *)(atsr + 1),
- ((void *)atsr) + header->length,
- &atsru->devices_cnt, &atsru->devices);
- else
{
- printk(KERN_INFO PREFIX "found ALL_PORTS\n");
+ dev_scope_start = (void *)(atsr + 1);
+ dev_scope_end = ((void *)atsr) + header->length;
+ ret = acpi_parse_dev_scope(dev_scope_start, dev_scope_end,
+ atsru, ATSR_TYPE);
+ }
+ else {
+ dprintk(XENLOG_INFO VTDPREFIX, "found ALL_PORTS\n");
/* Only allow one ALL_PORTS */
if ( all_ports )
{
- printk(KERN_WARNING PREFIX "Only one ALL_PORTS "
- "device scope is allowed\n");
+ dprintk(XENLOG_WARNING VTDPREFIX,
+ "Only one ALL_PORTS device scope is allowed\n");
ret = -EINVAL;
}
all_ports = 1;
dmar = (struct acpi_table_dmar *)__acpi_map_table(phys_addr, size);
if ( !dmar )
{
- printk(KERN_WARNING PREFIX "Unable to map DMAR\n");
+ dprintk(XENLOG_WARNING VTDPREFIX, "Unable to map DMAR\n");
return -ENODEV;
}
if ( !dmar->haw )
{
- printk(KERN_WARNING PREFIX "Zero: Invalid DMAR haw\n");
+ dprintk(XENLOG_WARNING VTDPREFIX, "Zero: Invalid DMAR haw\n");
return -EINVAL;
}
dmar_host_address_width = dmar->haw;
- printk(KERN_INFO PREFIX "Host address width %d\n",
- dmar_host_address_width);
+ dprintk(XENLOG_INFO VTDPREFIX, "Host address width %d\n",
+ dmar_host_address_width);
entry_header = (struct acpi_dmar_entry_header *)(dmar + 1);
while ( ((unsigned long)entry_header) <
switch ( entry_header->type )
{
case ACPI_DMAR_DRHD:
- printk(KERN_INFO PREFIX "found ACPI_DMAR_DRHD\n");
+ dprintk(XENLOG_INFO VTDPREFIX, "found ACPI_DMAR_DRHD\n");
ret = acpi_parse_one_drhd(entry_header);
break;
case ACPI_DMAR_RMRR:
- printk(KERN_INFO PREFIX "found ACPI_DMAR_RMRR\n");
+ dprintk(XENLOG_INFO VTDPREFIX, "found ACPI_DMAR_RMRR\n");
ret = acpi_parse_one_rmrr(entry_header);
break;
case ACPI_DMAR_ATSR:
- printk(KERN_INFO PREFIX "found ACPI_DMAR_ATSR\n");
+ dprintk(XENLOG_INFO VTDPREFIX, "found ACPI_DMAR_ATSR\n");
ret = acpi_parse_one_atsr(entry_header);
break;
default:
- printk(KERN_WARNING PREFIX "Unknown DMAR structure type\n");
+ dprintk(XENLOG_WARNING VTDPREFIX, "Unknown DMAR structure type\n");
ret = -EINVAL;
break;
}
if ( list_empty(&acpi_drhd_units) )
{
- printk(KERN_ERR PREFIX "No DMAR devices found\n");
+ dprintk(XENLOG_ERR VTDPREFIX, "No DMAR devices found\n");
vtd_enabled = 0;
return -ENODEV;
}
#define DMA_TLB_IVA_HINT(x) ((((u64)x) & 1) << 6)
/* GCMD_REG */
-#define DMA_GCMD_TE (((u64)1) << 31)
-#define DMA_GCMD_SRTP (((u64)1) << 30)
-#define DMA_GCMD_SFL (((u64)1) << 29)
-#define DMA_GCMD_EAFL (((u64)1) << 28)
-#define DMA_GCMD_WBF (((u64)1) << 27)
-#define DMA_GCMD_QIE (((u64)1) << 26)
-#define DMA_GCMD_IRE (((u64)1) << 25)
-#define DMA_GCMD_SIRTP (((u64)1) << 24)
+#define DMA_GCMD_TE (((u64)1) << 31)
+#define DMA_GCMD_SRTP (((u64)1) << 30)
+#define DMA_GCMD_SFL (((u64)1) << 29)
+#define DMA_GCMD_EAFL (((u64)1) << 28)
+#define DMA_GCMD_WBF (((u64)1) << 27)
+#define DMA_GCMD_QIE (((u64)1) << 26)
+#define DMA_GCMD_IRE (((u64)1) << 25)
+#define DMA_GCMD_SIRTP (((u64)1) << 24)
+#define DMA_GCMD_CFI (((u64)1) << 23)
/* GSTS_REG */
-#define DMA_GSTS_TES (((u64)1) << 31)
-#define DMA_GSTS_RTPS (((u64)1) << 30)
-#define DMA_GSTS_FLS (((u64)1) << 29)
-#define DMA_GSTS_AFLS (((u64)1) << 28)
-#define DMA_GSTS_WBFS (((u64)1) << 27)
-#define DMA_GSTS_IRTPS (((u64)1) << 24)
+#define DMA_GSTS_TES (((u64)1) << 31)
+#define DMA_GSTS_RTPS (((u64)1) << 30)
+#define DMA_GSTS_FLS (((u64)1) << 29)
+#define DMA_GSTS_AFLS (((u64)1) << 28)
+#define DMA_GSTS_WBFS (((u64)1) << 27)
#define DMA_GSTS_QIES (((u64)1) <<26)
#define DMA_GSTS_IRES (((u64)1) <<25)
+#define DMA_GSTS_SIRTPS (((u64)1) << 24)
+#define DMA_GSTS_CFIS (((u64)1) <<23)
/* PMEN_REG */
-#define DMA_PMEN_EPM (((u32)1) << 31)
-#define DMA_PMEN_PRS (((u32)1) << 0)
+#define DMA_PMEN_EPM (((u32)1) << 31)
+#define DMA_PMEN_PRS (((u32)1) << 0)
/* CCMD_REG */
#define DMA_CCMD_INVL_GRANU_OFFSET 61
-#define DMA_CCMD_ICC (((u64)1) << 63)
+#define DMA_CCMD_ICC (((u64)1) << 63)
#define DMA_CCMD_GLOBAL_INVL (((u64)1) << 61)
#define DMA_CCMD_DOMAIN_INVL (((u64)2) << 61)
#define DMA_CCMD_DEVICE_INVL (((u64)3) << 61)
#define DMA_FECTL_IM (((u64)1) << 31)
/* FSTS_REG */
-#define DMA_FSTS_PPF ((u64)2)
-#define DMA_FSTS_PFO ((u64)1)
+#define DMA_FSTS_PFO ((u64)1 << 0)
+#define DMA_FSTS_PPF ((u64)1 << 1)
+#define DMA_FSTS_AFO ((u64)1 << 2)
+#define DMA_FSTS_APF ((u64)1 << 3)
+#define DMA_FSTS_IQE ((u64)1 << 4)
+#define DMA_FSTS_ICE ((u64)1 << 5)
+#define DMA_FSTS_ITE ((u64)1 << 6)
+#define DMA_FSTS_FAULTS DMA_FSTS_PFO | DMA_FSTS_PPF | DMA_FSTS_AFO | DMA_FSTS_APF | DMA_FSTS_IQE | DMA_FSTS_ICE | DMA_FSTS_ITE
#define dma_fsts_fault_record_index(s) (((s) >> 8) & 0xff)
/* FRCD_REG, 32 bits access */
/* interrupt remap entry */
struct iremap_entry {
+ union {
+ u64 lo_val;
struct {
- u64 present : 1,
+ u64 p : 1,
fpd : 1,
dm : 1,
rh : 1,
res_2 : 8,
dst : 32;
}lo;
+ };
+ union {
+ u64 hi_val;
struct {
u64 sid : 16,
sq : 2,
svt : 2,
res_1 : 44;
}hi;
+ };
};
#define IREMAP_ENTRY_NR (PAGE_SIZE_4K/sizeof(struct iremap_entry))
#define iremap_present(v) ((v).lo & 1)
#define RESERVED_VAL 0
-#define TYPE_INVAL_CONTEXT 1
-#define TYPE_INVAL_IOTLB 2
-#define TYPE_INVAL_DEVICE_IOTLB 3
-#define TYPE_INVAL_IEC 4
-#define TYPE_INVAL_WAIT 5
+#define TYPE_INVAL_CONTEXT 0x1
+#define TYPE_INVAL_IOTLB 0x2
+#define TYPE_INVAL_DEVICE_IOTLB 0x3
+#define TYPE_INVAL_IEC 0x4
+#define TYPE_INVAL_WAIT 0x5
#define NOTIFY_TYPE_POLL 1
#define NOTIFY_TYPE_INTR 1
#define IEC_GLOBAL_INVL 0
#define IEC_INDEX_INVL 1
+#define IRTA_REG_EIME_SHIFT 11
+#define IRTA_REG_TABLE_SIZE 7 // 4k page = 256 * 16 byte entries
+ // 2^^(IRTA_REG_TABLE_SIZE + 1) = 256
+ // IRTA_REG_TABLE_SIZE = 7
#define VTD_PAGE_TABLE_LEVEL_3 3
#define VTD_PAGE_TABLE_LEVEL_4 4
extern struct list_head acpi_rmrr_units;
extern struct list_head acpi_ioapic_units;
+struct qi_ctrl {
+ struct qinval_entry *qinval; /* queue invalidation page */
+ int qinval_index; /* queue invalidation index */
+ spinlock_t qinval_lock; /* lock for queue invalidation page */
+ spinlock_t qinval_poll_lock; /* lock for queue invalidation poll addr */
+ volatile u32 qinval_poll_status; /* used by poll methord to sync */
+};
+
+struct ir_ctrl {
+ struct iremap_entry *iremap; /* interrupt remap table */
+ int iremap_index; /* interrupt remap index */
+ spinlock_t iremap_lock; /* lock for irq remappping table */
+};
+
+struct iommu_flush {
+ int (*context)(void *iommu, u16 did, u16 source_id, u8 function_mask, u64 type, int non_present_entry_flush);
+ int (*iotlb)(void *iommu, u16 did, u64 addr, unsigned int size_order, u64 type, int non_present_entry_flush);
+};
+
+struct intel_iommu {
+ struct qi_ctrl qi_ctrl;
+ struct ir_ctrl ir_ctrl;
+ struct iommu_flush flush;
+};
+
#endif